# # use_pipeline = "{{es_pipeline}}"
# # default_pipeline = "my_pipeline"
# #
-# # Custom HTTP headers
-# # To pass custom HTTP headers please define it in a given below section
+# ## Custom HTTP Headers
+# ## To pass custom HTTP headers please define it in a given below section
# # [outputs.elasticsearch.headers]
-# # "X-Custom-Header" = "custom-value"
+# # "X-Custom-Header" = ["custom-value1", "custom-value2"]
#
# ## Template Index Settings
# ## Overrides the template settings.index section with any provided options.
# ## Kafka topic for producer messages
# topic = "telegraf"
#
-# ## The value of this tag will be used as the topic. If not set the 'topic'
-# ## option is used.
+# ## Tag value to be used as the topic. If not set or the tag does not exist,
+# ## the 'topic' option is used.
# # topic_tag = ""
#
# ## If true, the 'topic_tag' will be removed from to the metric.
# ## OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI, AWS-MSK-IAM
# # sasl_mechanism = ""
#
-# ## used if sasl_mechanism is GSSAPI
+# ## Used if sasl_mechanism is GSSAPI
# # sasl_gssapi_service_name = ""
# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH
# # sasl_gssapi_auth_type = "KRB5_USER_AUTH"
# ## Access token used if sasl_mechanism is OAUTHBEARER
# # sasl_access_token = ""
#
-# ## used if sasl_mechanism is AWS-MSK-IAM
+# ## Used if sasl_mechanism is AWS-MSK-IAM
# # sasl_aws_msk_iam_region = ""
# ## for profile based auth
# ## sasl_aws_msk_iam_profile = ""
# ## SASL protocol version. When connecting to Azure EventHub set to 0.
# # sasl_version = 1
#
-# # Disable Kafka metadata full fetch
+# ## Disable Kafka metadata full fetch
# # metadata_full = false
#
# ## Maximum number of retries for metadata operations including
# ## Optional NATS 2.0 and NATS NGS compatible user credentials
# # credentials = "/etc/telegraf/nats.creds"
#
-# ## NATS subject for producer messages
-# ## For jetstream this is also the subject where messages will be published
+# ## NATS subject for producer messages.
+# ## This field can be a static string or a Go template, see README for details.
+# ## Incompatible with `use_batch_format
# subject = "telegraf"
#
# ## Use Transport Layer Security
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
+# ## Use batch serialization instead of per metric. The batch format allows for the
+# ## production of batch output formats and may more efficiently encode and write metrics.
+# # use_batch_format = false
+#
# ## Data format to output.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## Jetstream specific configuration. If not nil, it will assume Jetstream context.
# ## Since this is a table, it should be present at the end of the plugin section. Else you can use inline table format.
# # [outputs.nats.jetstream]
-# ## Name of the stream, required when using jetstream. Telegraf will
-# ## use the union of the above subject and below the subjects array.
+# ## Name of the stream, required when using jetstream.
# # name = ""
+# ## List of subjects to register on the stream
# # subjects = []
#
# ## Use asynchronous publishing for higher throughput, but note that it does not guarantee order within batches.
# # allow_rollup_hdrs = false
# # allow_direct = true
# # mirror_direct = false
+#
+# ## Disable creating the stream but assume the stream is managed externally
+# ## and already exists. This will make the plugin fail if the steam does not exist.
+# # disable_stream_creation = false
# # Send aggregated metrics to Nebius.Cloud Monitoring
# ## Initialization SQL
# # init_sql = ""
#
+# ## Send metrics with the same columns and the same table as batches using prepared statements
+# # batch_transactions = false
+#
# ## Maximum amount of time a connection may be idle. "0s" means connections are
# ## never closed due to idle time.
# # connection_max_idle_time = "0s"
# ## processors.converter after this one, specifying the order attribute.
+# # Round numerical fields
+# [[processors.round]]
+# ## Precision to round to.
+# ## A positive number indicates rounding to the right of the decimal separator (i.e. the fractional part).
+# ## A negative number indicates rounding to the left of the decimal separator.
+# # precision = 0
+#
+# ## Round only numeric fields matching the filter criteria below.
+# ## Excludes takes precedence over includes.
+# # include_fields = ["*"]
+# # exclude_fields = []
+
+
# # Add the S2 Cell ID as a tag based on latitude and longitude fields
# [[processors.s2geo]]
# ## The name of the lat and lon fields containing WGS-84 latitude and
# ## If true, collect metrics from Go's runtime.metrics. For a full list see:
# ## https://pkg.go.dev/runtime/metrics
# # collect_gostats = false
+#
+# ## Collect statistics per plugin instance and not per plugin type
+# # per_instance = false
# # Monitors internet speed using speedtest.net service
# "/etc/ssl/certs/ssl-cert-snakeoil.pem",
# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem",
# "jks:///etc/mycerts/keystore.jks",
-# "pkcs12:///etc/mycerts/keystore.p12"]
+# "pkcs12:///etc/mycerts/keystore.p12",
+# "wincertstore://machine:ROOT", "wincertstore://user:CA"]
#
# ## Timeout for SSL connection
# # timeout = "5s"
# ## For each combination a field is created.
# ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
# # sdparam_separator = "_"
+#
+# ## Maximum length allowed for a single message (in bytes when no unit specified)
+# ## Only applies to octet-counting framing.
+# # max_message_length = "8KiB"
# # Gather information about systemd-unit states
# #timeout = 5s
+# ## Gather CPU metrics using Turbostat
+# [[inputs.turbostat]]
+# ## Path to the Turbostat exectuable if not in the PATH
+# # path = "/usr/bin/turbostat"
+#
+# ## Turbostat measurement interval
+# # interval = "10s"
+#
+# ## Use sudo to run the Turbostat executable
+# # use_sudo = false
+
+
# # Read metrics from the Vault API
# [[inputs.vault]]
# ## URL for the Vault agent